struct net_device *dev;
struct net_device_stats stats;
+ unsigned int carrier;
+
wait_queue_head_t waiting_to_free;
} netif_t;
+/*
+ * Implement our own carrier flag: the network stack's version causes delays
+ * when the carrier is re-enabled (in particular, dev_activate() may not
+ * immediately be called, which can cause packet loss; also the etherbridge
+ * can be rather lazy in activating its port).
+ */
+#define netback_carrier_on(netif) ((netif)->carrier = 1)
+#define netback_carrier_off(netif) ((netif)->carrier = 0)
+#define netback_carrier_ok(netif) ((netif)->carrier)
+
#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
void netif_xenbus_init(void);
-#define netif_schedulable(dev) (netif_running(dev) && netif_carrier_ok(dev))
+#define netif_schedulable(netif) \
+ (netif_running((netif)->dev) && netback_carrier_ok(netif))
void netif_schedule_work(netif_t *netif);
void netif_deschedule_work(netif_t *netif);
static int net_open(struct net_device *dev)
{
netif_t *netif = netdev_priv(dev);
- if (netif_carrier_ok(dev))
+ if (netback_carrier_ok(netif)) {
__netif_up(netif);
+ netif_start_queue(dev);
+ }
return 0;
}
static int net_close(struct net_device *dev)
{
netif_t *netif = netdev_priv(dev);
- if (netif_carrier_ok(dev))
+ if (netback_carrier_ok(netif))
__netif_down(netif);
+ netif_stop_queue(dev);
return 0;
}
return ERR_PTR(-ENOMEM);
}
- netif_carrier_off(dev);
-
netif = netdev_priv(dev);
memset(netif, 0, sizeof(*netif));
netif->domid = domid;
init_waitqueue_head(&netif->waiting_to_free);
netif->dev = dev;
+ netback_carrier_off(netif);
+
netif->credit_bytes = netif->remaining_credit = ~0UL;
netif->credit_usec = 0UL;
init_timer(&netif->credit_timeout);
netif_get(netif);
rtnl_lock();
- netif_carrier_on(netif->dev);
+ netback_carrier_on(netif);
if (netif_running(netif->dev))
__netif_up(netif);
rtnl_unlock();
void netif_disconnect(netif_t *netif)
{
- if (netif_carrier_ok(netif->dev)) {
+ if (netback_carrier_ok(netif)) {
rtnl_lock();
- netif_carrier_off(netif->dev);
+ netback_carrier_off(netif);
+ netif_carrier_off(netif->dev); /* discard queued packets */
if (netif_running(netif->dev))
__netif_down(netif);
rtnl_unlock();
#include <xen/balloon.h>
#include <xen/interface/memory.h>
-/*#define NETBE_DEBUG_INTERRUPT*/
+#define NETBE_DEBUG_INTERRUPT
/* extra field used in struct page */
#define netif_page_index(pg) (*(long *)&(pg)->mapping)
static void tx_queue_callback(unsigned long data)
{
netif_t *netif = (netif_t *)data;
- if (netif_schedulable(netif->dev))
+ if (netif_schedulable(netif))
netif_wake_queue(netif->dev);
}
BUG_ON(skb->dev != dev);
/* Drop the packet if the target domain has no receive buffers. */
- if (unlikely(!netif_schedulable(dev) || netbk_queue_full(netif)))
+ if (unlikely(!netif_schedulable(netif) || netbk_queue_full(netif)))
goto drop;
/*
}
if (netif_queue_stopped(netif->dev) &&
- netif_schedulable(netif->dev) &&
+ netif_schedulable(netif) &&
!netbk_queue_full(netif))
netif_wake_queue(netif->dev);
spin_lock_irq(&net_schedule_list_lock);
if (!__on_net_schedule_list(netif) &&
- likely(netif_schedulable(netif->dev))) {
+ likely(netif_schedulable(netif))) {
list_add_tail(&netif->list, &net_schedule_list);
netif_get(netif);
}
add_to_net_schedule_list_tail(netif);
maybe_schedule_tx_action();
- if (netif_schedulable(netif->dev) && !netbk_queue_full(netif))
+ if (netif_schedulable(netif) && !netbk_queue_full(netif))
netif_wake_queue(netif->dev);
return IRQ_HANDLED;
unsigned int irq;
unsigned int copying_receiver;
+ unsigned int carrier;
/* Receive-ring batched refills. */
#define RX_MIN_TARGET 8
struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
};
+/*
+ * Implement our own carrier flag: the network stack's version causes delays
+ * when the carrier is re-enabled (in particular, dev_activate() may not
+ * immediately be called, which can cause packet loss).
+ */
+#define netfront_carrier_on(netif) ((netif)->carrier = 1)
+#define netfront_carrier_off(netif) ((netif)->carrier = 0)
+#define netfront_carrier_ok(netif) ((netif)->carrier)
+
/*
* Access macros for acquiring freeing slots in tx_skbs[].
*/
return dev_queue_xmit(skb);
}
+static inline int netfront_tx_slot_available(struct netfront_info *np)
+{
+ return ((np->tx.req_prod_pvt - np->tx.rsp_cons) <
+ (TX_MAX_TARGET - MAX_SKB_FRAGS - 2));
+}
+
+static inline void network_maybe_wake_tx(struct net_device *dev)
+{
+ struct netfront_info *np = netdev_priv(dev);
+
+ if (unlikely(netif_queue_stopped(dev)) &&
+ netfront_tx_slot_available(np) &&
+ likely(netif_running(dev)))
+ netif_wake_queue(dev);
+}
+
static int network_open(struct net_device *dev)
{
struct netfront_info *np = netdev_priv(dev);
memset(&np->stats, 0, sizeof(np->stats));
spin_lock(&np->rx_lock);
- if (netif_carrier_ok(dev)) {
+ if (netfront_carrier_ok(np)) {
network_alloc_rx_buffers(dev);
np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
}
spin_unlock(&np->rx_lock);
- netif_start_queue(dev);
+ network_maybe_wake_tx(dev);
return 0;
}
-static inline int netfront_tx_slot_available(struct netfront_info *np)
-{
- return ((np->tx.req_prod_pvt - np->tx.rsp_cons) <
- (TX_MAX_TARGET - MAX_SKB_FRAGS - 2));
-}
-
-static inline void network_maybe_wake_tx(struct net_device *dev)
-{
- struct netfront_info *np = netdev_priv(dev);
-
- if (unlikely(netif_queue_stopped(dev)) &&
- netfront_tx_slot_available(np) &&
- likely(netif_running(dev)))
- netif_wake_queue(dev);
-}
-
static void network_tx_buf_gc(struct net_device *dev)
{
RING_IDX cons, prod;
struct netfront_info *np = netdev_priv(dev);
struct sk_buff *skb;
- BUG_ON(!netif_carrier_ok(dev));
+ BUG_ON(!netfront_carrier_ok(np));
do {
prod = np->tx.sring->rsp_prod;
int nr_flips;
netif_rx_request_t *req;
- if (unlikely(!netif_carrier_ok(dev)))
+ if (unlikely(!netfront_carrier_ok(np)))
return;
/*
spin_lock_irq(&np->tx_lock);
- if (unlikely(!netif_carrier_ok(dev) ||
+ if (unlikely(!netfront_carrier_ok(np) ||
(frags > 1 && !xennet_can_sg(dev)) ||
netif_needs_gso(dev, skb))) {
spin_unlock_irq(&np->tx_lock);
spin_lock_irqsave(&np->tx_lock, flags);
- if (likely(netif_carrier_ok(dev))) {
+ if (likely(netfront_carrier_ok(np))) {
network_tx_buf_gc(dev);
/* Under tx_lock: protects access to rx shared-ring indexes. */
if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
spin_lock(&np->rx_lock);
- if (unlikely(!netif_carrier_ok(dev))) {
+ if (unlikely(!netfront_carrier_ok(np))) {
spin_unlock(&np->rx_lock);
return 0;
}
work_done = 0;
while ((i != rp) && (work_done < budget)) {
memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
- memset(extras, 0, sizeof(extras));
+ memset(extras, 0, sizeof(rinfo.extras));
err = xennet_get_responses(np, &rinfo, rp, &tmpq,
&pages_flipped);
* domain a kick because we've probably just requeued some
* packets.
*/
- netif_carrier_on(dev);
+ netfront_carrier_on(np);
notify_remote_via_irq(np->irq);
network_tx_buf_gc(dev);
network_alloc_rx_buffers(dev);
np->netdev = netdev;
- netif_carrier_off(netdev);
+ netfront_carrier_off(np);
return netdev;
/* Stop old i/f to prevent errors whilst we rebuild the state. */
spin_lock_irq(&info->tx_lock);
spin_lock(&info->rx_lock);
- netif_carrier_off(info->netdev);
+ netfront_carrier_off(info);
spin_unlock(&info->rx_lock);
spin_unlock_irq(&info->tx_lock);